#include <xen/keyhandler.h>
#include <xen/cpuidle.h>
#include <xen/trace.h>
+#include <xen/sched-if.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/hpet.h>
}
}
+/* vcpu is urgent if vcpu is polling event channel
+ *
+ * if urgent vcpu exists, CPU should not enter deep C state
+ */
+static int sched_has_urgent_vcpu(void)
+{
+ return atomic_read(&this_cpu(schedule_data).urgent_count);
+}
+
static void acpi_processor_idle(void)
{
struct acpi_processor_power *power = processor_powers[smp_processor_id()];
u32 exp = 0, pred = 0;
u32 irq_traced[4] = { 0 };
- cpufreq_dbs_timer_suspend();
-
- sched_tick_suspend();
- /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
- process_pending_softirqs();
-
- /*
- * Interrupts must be disabled during bus mastering calculations and
- * for C2/C3 transitions.
- */
- local_irq_disable();
-
- if ( softirq_pending(smp_processor_id()) )
- {
- local_irq_enable();
- sched_tick_resume();
- cpufreq_dbs_timer_resume();
- return;
- }
-
- if ( max_cstate > 0 && power &&
+ if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() &&
(next_state = cpuidle_current_governor->select(power)) > 0 )
{
cx = &power->states[next_state];
pm_idle_save();
else
acpi_safe_halt();
+ return;
+ }
+
+ cpufreq_dbs_timer_suspend();
+
+ sched_tick_suspend();
+ /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
+ process_pending_softirqs();
+
+ /*
+ * Interrupts must be disabled during bus mastering calculations and
+ * for C2/C3 transitions.
+ */
+ local_irq_disable();
+
+ if ( softirq_pending(smp_processor_id()) )
+ {
+ local_irq_enable();
sched_tick_resume();
cpufreq_dbs_timer_resume();
return;
(unsigned char *)&d);
}
+static inline void vcpu_urgent_count_update(struct vcpu *v)
+{
+ if ( is_idle_vcpu(v) )
+ return;
+
+ if ( unlikely(v->is_urgent) )
+ {
+ if ( !test_bit(v->vcpu_id, v->domain->poll_mask) )
+ {
+ v->is_urgent = 0;
+ atomic_dec(&per_cpu(schedule_data,v->processor).urgent_count);
+ }
+ }
+ else
+ {
+ if ( unlikely(test_bit(v->vcpu_id, v->domain->poll_mask)) )
+ {
+ v->is_urgent = 1;
+ atomic_inc(&per_cpu(schedule_data,v->processor).urgent_count);
+ }
+ }
+}
+
static inline void vcpu_runstate_change(
struct vcpu *v, int new_state, s_time_t new_entry_time)
{
ASSERT(v->runstate.state != new_state);
ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
+ vcpu_urgent_count_update(v);
+
trace_runstate_change(v, new_state);
delta = new_entry_time - v->runstate.state_entry_time;
kill_timer(&v->periodic_timer);
kill_timer(&v->singleshot_timer);
kill_timer(&v->poll_timer);
+ if ( test_and_clear_bool(v->is_urgent) )
+ atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
SCHED_OP(destroy_vcpu, v);
}
static void vcpu_migrate(struct vcpu *v)
{
unsigned long flags;
- int old_cpu;
+ int old_cpu, new_cpu;
vcpu_schedule_lock_irqsave(v, flags);
return;
}
- /* Switch to new CPU, then unlock old CPU. */
+ /* Select new CPU. */
old_cpu = v->processor;
- v->processor = SCHED_OP(pick_cpu, v);
+ new_cpu = SCHED_OP(pick_cpu, v);
+
+ /*
+ * Transfer urgency status to new CPU before switching CPUs, as once
+ * the switch occurs, v->is_urgent is no longer protected by the per-CPU
+ * scheduler lock we are holding.
+ */
+ if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) )
+ {
+ atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count);
+ atomic_dec(&per_cpu(schedule_data, old_cpu).urgent_count);
+ }
+
+ /* Switch to new CPU, then unlock old CPU. */
+ v->processor = new_cpu;
spin_unlock_irqrestore(
&per_cpu(schedule_data, old_cpu).schedule_lock, flags);